home *** CD-ROM | disk | FTP | other *** search
- #ifndef __ASM_SYSTEM_H
- #define __ASM_SYSTEM_H
-
- #include <linux/kernel.h>
- #include <asm/segment.h>
-
- #ifdef __KERNEL__
-
- struct task_struct; /* one of the stranger aspects of C forward declarations.. */
- extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
-
- /*
- * We do most of the task switching in C, but we need
- * to do the EIP/ESP switch in assembly..
- */
- #define switch_to(prev,next) do { \
- unsigned long eax, edx, ecx; \
- asm volatile("pushl %%ebx\n\t" \
- "pushl %%esi\n\t" \
- "pushl %%edi\n\t" \
- "pushl %%ebp\n\t" \
- "movl %%esp,%0\n\t" /* save ESP */ \
- "movl %5,%%esp\n\t" /* restore ESP */ \
- "movl $1f,%1\n\t" /* save EIP */ \
- "pushl %6\n\t" /* restore EIP */ \
- "jmp __switch_to\n" \
- "1:\t" \
- "popl %%ebp\n\t" \
- "popl %%edi\n\t" \
- "popl %%esi\n\t" \
- "popl %%ebx" \
- :"=m" (prev->tss.esp),"=m" (prev->tss.eip), \
- "=a" (eax), "=d" (edx), "=c" (ecx) \
- :"m" (next->tss.esp),"m" (next->tss.eip), \
- "a" (prev), "d" (next)); \
- } while (0)
-
- #define _set_base(addr,base) do { unsigned long __pr; \
- __asm__ __volatile__ ("movw %%dx,%1\n\t" \
- "rorl $16,%%edx\n\t" \
- "movb %%dl,%2\n\t" \
- "movb %%dh,%3" \
- :"=&d" (__pr) \
- :"m" (*((addr)+2)), \
- "m" (*((addr)+4)), \
- "m" (*((addr)+7)), \
- "0" (base) \
- ); } while(0)
-
- #define _set_limit(addr,limit) do { unsigned long __lr; \
- __asm__ __volatile__ ("movw %%dx,%1\n\t" \
- "rorl $16,%%edx\n\t" \
- "movb %2,%%dh\n\t" \
- "andb $0xf0,%%dh\n\t" \
- "orb %%dh,%%dl\n\t" \
- "movb %%dl,%2" \
- :"=&d" (__lr) \
- :"m" (*(addr)), \
- "m" (*((addr)+6)), \
- "0" (limit) \
- ); } while(0)
-
- #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
- #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
-
- static inline unsigned long _get_base(char * addr)
- {
- unsigned long __base;
- __asm__("movb %3,%%dh\n\t"
- "movb %2,%%dl\n\t"
- "shll $16,%%edx\n\t"
- "movw %1,%%dx"
- :"=&d" (__base)
- :"m" (*((addr)+2)),
- "m" (*((addr)+4)),
- "m" (*((addr)+7)));
- return __base;
- }
-
- #define get_base(ldt) _get_base( ((char *)&(ldt)) )
-
- /*
- * Load a segment. Fall back on loading the zero
- * segment if something goes wrong..
- */
- #define loadsegment(seg,value) \
- asm volatile("\n" \
- "1:\t" \
- "movl %0,%%" #seg "\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3:\t" \
- "pushl $0\n\t" \
- "popl %%" #seg "\n\t" \
- "jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n\t" \
- ".align 4\n\t" \
- ".long 1b,3b\n" \
- ".previous" \
- : :"m" (*(unsigned int *)&(value)))
-
- /*
- * Clear and set 'TS' bit respectively
- */
- #define clts() __asm__ __volatile__ ("clts")
- #define read_cr0() ({ \
- unsigned int __dummy; \
- __asm__( \
- "movl %%cr0,%0\n\t" \
- :"=r" (__dummy)); \
- __dummy; \
- })
- #define write_cr0(x) \
- __asm__("movl %0,%%cr0": :"r" (x));
- #define stts() write_cr0(8 | read_cr0())
-
- #endif /* __KERNEL__ */
-
- static inline unsigned long get_limit(unsigned long segment)
- {
- unsigned long __limit;
- __asm__("lsll %1,%0"
- :"=r" (__limit):"r" (segment));
- return __limit+1;
- }
-
- #define nop() __asm__ __volatile__ ("nop")
-
- #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
- #define tas(ptr) (xchg((ptr),1))
-
- struct __xchg_dummy { unsigned long a[100]; };
- #define __xg(x) ((struct __xchg_dummy *)(x))
-
- /*
- * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
- */
- static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
- {
- switch (size) {
- case 1:
- __asm__("xchgb %b0,%1"
- :"=q" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 2:
- __asm__("xchgw %w0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 4:
- __asm__("xchgl %0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- }
- return x;
- }
-
- /*
- * Force strict CPU ordering.
- * And yes, this is required on UP too when we're talking
- * to devices.
- *
- * For now, "wmb()" doesn't actually do anything, as all
- * Intel CPU's follow what Intel calls a *Processor Order*,
- * in which all writes are seen in the program order even
- * outside the CPU.
- *
- * I expect future Intel CPU's to have a weaker ordering,
- * but I'd also expect them to finally get their act together
- * and add some real memory barriers if so.
- */
- #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
- #define rmb() mb()
- #define wmb() __asm__ __volatile__ ("": : :"memory")
-
- /* interrupt control.. */
- #define __sti() __asm__ __volatile__ ("sti": : :"memory")
- #define __cli() __asm__ __volatile__ ("cli": : :"memory")
- #define __save_flags(x) \
- __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */ :"memory")
- #define __restore_flags(x) \
- __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory")
-
-
- #ifdef __SMP__
-
- extern void __global_cli(void);
- extern void __global_sti(void);
- extern unsigned long __global_save_flags(void);
- extern void __global_restore_flags(unsigned long);
- #define cli() __global_cli()
- #define sti() __global_sti()
- #define save_flags(x) ((x)=__global_save_flags())
- #define restore_flags(x) __global_restore_flags(x)
-
- #else
-
- #define cli() __cli()
- #define sti() __sti()
- #define save_flags(x) __save_flags(x)
- #define restore_flags(x) __restore_flags(x)
-
- #endif
-
- /*
- * disable hlt during certain critical i/o operations
- */
- #define HAVE_DISABLE_HLT
- void disable_hlt(void);
- void enable_hlt(void);
-
- #endif
-